bitkeeper revision 1.1108.23.3 (41065b4fZjYRRIBRamPXu6SAWqoNVQ)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 27 Jul 2004 13:40:31 +0000 (13:40 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 27 Jul 2004 13:40:31 +0000 (13:40 +0000)
Reformat the pfn_info structure.

xen/arch/x86/domain.c
xen/arch/x86/memory.c
xen/arch/x86/shadow.c
xen/arch/x86/x86_32/mm.c
xen/common/dom0_ops.c
xen/common/dom_mem_ops.c
xen/common/domain.c
xen/common/keyhandler.c
xen/common/memory.c
xen/include/asm-x86/mm.h
xen/include/asm-x86/shadow.h

index eb0590f9d2b377f79bcaa7212f68cdae794361d3..b189cc7732dae5f573e6ac892221db7c4b7bfaf9 100644 (file)
@@ -467,7 +467,7 @@ void domain_relinquish_memory(struct domain *d)
 
     /* Relinquish Xen-heap pages. Currently this can only be 'shared_info'. */
     page = virt_to_page(d->shared_info);
-    if ( test_and_clear_bit(_PGC_allocated, &page->count_and_flags) )
+    if ( test_and_clear_bit(_PGC_allocated, &page->u.inuse.count_info) )
         put_page(page);
 
     /* Relinquish all pages on the domain's allocation list. */
@@ -476,10 +476,10 @@ void domain_relinquish_memory(struct domain *d)
     {
         page = list_entry(ent, struct pfn_info, list);
 
-        if ( test_and_clear_bit(_PGC_guest_pinned, &page->count_and_flags) )
+        if ( test_and_clear_bit(_PGC_guest_pinned, &page->u.inuse.count_info) )
             put_page_and_type(page);
 
-        if ( test_and_clear_bit(_PGC_allocated, &page->count_and_flags) )
+        if ( test_and_clear_bit(_PGC_allocated, &page->u.inuse.count_info) )
             put_page(page);
 
         /*
@@ -488,13 +488,13 @@ void domain_relinquish_memory(struct domain *d)
          * are not shared across domains and this domain is now dead. Thus base
          * tables are not in use so a non-zero count means circular reference.
          */
-        y = page->type_and_flags;
+        y = page->u.inuse.type_info;
         do {
             x = y;
             if ( likely((x & (PGT_type_mask|PGT_validated)) != 
                         (PGT_base_page_table|PGT_validated)) )
                 break;
-            y = cmpxchg(&page->type_and_flags, x, x & ~PGT_validated);
+            y = cmpxchg(&page->u.inuse.type_info, x, x & ~PGT_validated);
             if ( likely(y == x) )
                 free_page_type(page, PGT_base_page_table);
         }
@@ -654,9 +654,9 @@ int construct_dom0(struct domain *p,
           mfn++ )
     {
         page = &frame_table[mfn];
-        page->u.domain        = p;
-        page->type_and_flags  = 0;
-        page->count_and_flags = PGC_allocated | 1;
+        page->u.inuse.domain        = p;
+        page->u.inuse.type_info  = 0;
+        page->u.inuse.count_info = PGC_allocated | 1;
         list_add_tail(&page->list, &p->page_list);
         p->tot_pages++; p->max_pages++;
     }
@@ -701,7 +701,7 @@ int construct_dom0(struct domain *p,
         *l1tab++ = mk_l1_pgentry((mfn << PAGE_SHIFT) | L1_PROT);
         
         page = &frame_table[mfn];
-        set_bit(_PGC_tlb_flush_on_type_change, &page->count_and_flags);
+        set_bit(_PGC_tlb_flush_on_type_change, &page->u.inuse.count_info);
         if ( !get_page_and_type(page, p, PGT_writeable_page) )
             BUG();
 
@@ -719,18 +719,18 @@ int construct_dom0(struct domain *p,
         page = &frame_table[l1_pgentry_to_pagenr(*l1tab)];
         if ( count == 0 )
         {
-            page->type_and_flags &= ~PGT_type_mask;
-            page->type_and_flags |= PGT_l2_page_table;
+            page->u.inuse.type_info &= ~PGT_type_mask;
+            page->u.inuse.type_info |= PGT_l2_page_table;
             get_page(page, p); /* an extra ref because of readable mapping */
             /* Get another ref to L2 page so that it can be pinned. */
             if ( !get_page_and_type(page, p, PGT_l2_page_table) )
                 BUG();
-            set_bit(_PGC_guest_pinned, &page->count_and_flags);
+            set_bit(_PGC_guest_pinned, &page->u.inuse.count_info);
         }
         else
         {
-            page->type_and_flags &= ~PGT_type_mask;
-            page->type_and_flags |= PGT_l1_page_table;
+            page->u.inuse.type_info &= ~PGT_type_mask;
+            page->u.inuse.type_info |= PGT_l1_page_table;
             get_page(page, p); /* an extra ref because of readable mapping */
         }
         l1tab++;
index cd7ae9b2c67702101a31bd0900b4377e0019e9c7..74b4207babaff00570d6959924af2aa6ef6eea79 100644 (file)
@@ -245,7 +245,7 @@ static int get_page_and_type_from_pagenr(unsigned long page_nr,
     if ( unlikely(!get_page_type(page, type)) )
     {
         MEM_LOG("Bad page type for pfn %08lx (%08x)", 
-                page_nr, page->type_and_flags);
+                page_nr, page->u.inuse.type_info);
         put_page(page);
         return 0;
     }
@@ -288,7 +288,7 @@ static int get_linear_pagetable(l2_pgentry_t l2e, unsigned long pfn)
          * If so, atomically increment the count (checking for overflow).
          */
         page = &frame_table[l2_pgentry_to_pagenr(l2e)];
-        y = page->type_and_flags;
+        y = page->u.inuse.type_info;
         do {
             x = y;
             if ( unlikely((x & PGT_count_mask) == PGT_count_mask) ||
@@ -299,7 +299,7 @@ static int get_linear_pagetable(l2_pgentry_t l2e, unsigned long pfn)
                 return 0;
             }
         }
-        while ( (y = cmpxchg(&page->type_and_flags, x, x + 1)) != x );
+        while ( (y = cmpxchg(&page->u.inuse.type_info, x, x + 1)) != x );
     }
 
     return 1;
@@ -339,7 +339,7 @@ static int get_page_from_l1e(l1_pgentry_t l1e)
             pfn, PGT_writeable_page, GPS)) )
             return 0;
         set_bit(_PGC_tlb_flush_on_type_change, 
-                &frame_table[pfn].count_and_flags);
+                &frame_table[pfn].u.inuse.count_info);
         return 1;
     }
 
@@ -383,10 +383,10 @@ static void put_page_from_l1e(l1_pgentry_t l1e)
     else
     {
         /* We expect this is rare so we blow the entire shadow LDT. */
-        if ( unlikely(((page->type_and_flags & PGT_type_mask) == 
+        if ( unlikely(((page->u.inuse.type_info & PGT_type_mask) == 
                        PGT_ldt_page)) &&
-             unlikely(((page->type_and_flags & PGT_count_mask) != 0)) )
-            invalidate_shadow_ldt(page->u.domain);
+             unlikely(((page->u.inuse.type_info & PGT_count_mask) != 0)) )
+            invalidate_shadow_ldt(page->u.inuse.domain);
         put_page(page);
     }
 }
@@ -424,7 +424,7 @@ static int alloc_l2_table(struct pfn_info *page)
     pl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
         mk_l2_pgentry((page_nr << PAGE_SHIFT) | __PAGE_HYPERVISOR);
     pl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
-        mk_l2_pgentry(__pa(page->u.domain->mm.perdomain_pt) | 
+        mk_l2_pgentry(__pa(page->u.inuse.domain->mm.perdomain_pt) | 
                       __PAGE_HYPERVISOR);
 #endif
 
@@ -617,9 +617,9 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e)
 int alloc_page_type(struct pfn_info *page, unsigned int type)
 {
     if ( unlikely(test_and_clear_bit(_PGC_tlb_flush_on_type_change, 
-                                     &page->count_and_flags)) )
+                                     &page->u.inuse.count_info)) )
     {
-        struct domain *p = page->u.domain;
+        struct domain *p = page->u.inuse.domain;
         if ( unlikely(NEED_FLUSH(tlbflush_time[p->processor],
                                  page->tlbflush_timestamp)) )
         {
@@ -713,7 +713,7 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
         }
 
         if ( unlikely(test_and_set_bit(_PGC_guest_pinned, 
-                                       &page->count_and_flags)) )
+                                       &page->u.inuse.count_info)) )
         {
             MEM_LOG("Pfn %08lx already pinned", pfn);
             put_page_and_type(page);
@@ -727,10 +727,10 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
         if ( unlikely(!(okay = get_page_from_pagenr(pfn, PTS))) )
         {
             MEM_LOG("Page %08lx bad domain (dom=%p)",
-                    ptr, page->u.domain);
+                    ptr, page->u.inuse.domain);
         }
         else if ( likely(test_and_clear_bit(_PGC_guest_pinned, 
-                                            &page->count_and_flags)) )
+                                            &page->u.inuse.count_info)) )
         {
             put_page_and_type(page);
             put_page(page);
@@ -874,8 +874,8 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
          * benign reference to the page (PGC_allocated). If that reference
          * disappears then the deallocation routine will safely spin.
          */
-        nd = page->u.domain;
-        y  = page->count_and_flags;
+        nd = page->u.inuse.domain;
+        y  = page->u.inuse.count_info;
         do {
             x = y;
             if ( unlikely((x & (PGC_count_mask|PGC_allocated)) != 
@@ -884,14 +884,14 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
             {
                 MEM_LOG("Bad page values %08lx: ed=%p(%u), sd=%p,"
                         " caf=%08x, taf=%08x\n", page_to_pfn(page),
-                        d, d->domain, nd, x, page->type_and_flags);
+                        d, d->domain, nd, x, page->u.inuse.type_info);
                 okay = 0;
                 goto reassign_fail;
             }
             __asm__ __volatile__(
                 LOCK_PREFIX "cmpxchg8b %3"
                 : "=a" (nd), "=d" (y), "=b" (e),
-                "=m" (*(volatile u64 *)(&page->u.domain))
+                "=m" (*(volatile u64 *)(&page->u.inuse.domain))
                 : "0" (d), "1" (x), "b" (e), "c" (x) );
         } 
         while ( unlikely(nd != d) || unlikely(y != x) );
@@ -985,7 +985,7 @@ int do_mmu_update(mmu_update_t *ureqs, int count, int *success_count)
             }
 
             page = &frame_table[pfn];
-            switch ( (page->type_and_flags & PGT_type_mask) )
+            switch ( (page->u.inuse.type_info & PGT_type_mask) )
             {
             case PGT_l1_page_table: 
                 if ( likely(get_page_type(page, PGT_l1_page_table)) )
index e8da050918ce075307e4883311221cf4847b868b..57cd421daa5a1ea959fc04a49418bbf9b670172c 100644 (file)
@@ -68,7 +68,7 @@ static inline void free_shadow_page( struct mm_struct *m,
                                      struct pfn_info *pfn_info )
 {
     unsigned long flags;
-    unsigned long type = pfn_info->type_and_flags & PGT_type_mask;
+    unsigned long type = pfn_info->u.inuse.type_info & PGT_type_mask;
 
     m->shadow_page_count--;
 
@@ -77,9 +77,9 @@ static inline void free_shadow_page( struct mm_struct *m,
     else if (type == PGT_l2_page_table)
         perfc_decr(shadow_l2_pages);
     else printk("Free shadow weird page type pfn=%08x type=%08x\n",
-                frame_table-pfn_info, pfn_info->type_and_flags);
+                frame_table-pfn_info, pfn_info->u.inuse.type_info);
     
-    pfn_info->type_and_flags = 0;
+    pfn_info->u.inuse.type_info = 0;
 
     spin_lock_irqsave(&free_list_lock, flags);
     list_add(&pfn_info->list, &free_list);
@@ -147,7 +147,7 @@ static inline int shadow_page_op( struct mm_struct *m, unsigned int op,
     {
        case TABLE_OP_ZERO_L2:
        {
-               if ( (spfn_info->type_and_flags & PGT_type_mask) == 
+               if ( (spfn_info->u.inuse.type_info & PGT_type_mask) == 
              PGT_l2_page_table )
                {
                        unsigned long * spl1e = map_domain_mem( spfn<<PAGE_SHIFT );
@@ -161,7 +161,7 @@ static inline int shadow_page_op( struct mm_struct *m, unsigned int op,
        
        case TABLE_OP_ZERO_L1:
        {
-               if ( (spfn_info->type_and_flags & PGT_type_mask) == 
+               if ( (spfn_info->u.inuse.type_info & PGT_type_mask) == 
              PGT_l1_page_table )
                {
                        unsigned long * spl1e = map_domain_mem( spfn<<PAGE_SHIFT );
@@ -173,7 +173,7 @@ static inline int shadow_page_op( struct mm_struct *m, unsigned int op,
 
        case TABLE_OP_FREE_L1:
        {
-               if ( (spfn_info->type_and_flags & PGT_type_mask) == 
+               if ( (spfn_info->u.inuse.type_info & PGT_type_mask) == 
              PGT_l1_page_table )
                {
                        // lock is already held
@@ -564,7 +564,7 @@ unsigned long shadow_l2_table(
 
     ASSERT( spfn_info ); // XXX deal with failure later e.g. blow cache
 
-    spfn_info->type_and_flags = PGT_l2_page_table;
+    spfn_info->u.inuse.type_info = PGT_l2_page_table;
     perfc_incr(shadow_l2_pages);
 
     spfn = (unsigned long) (spfn_info - frame_table);
@@ -585,7 +585,7 @@ unsigned long shadow_l2_table(
     spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
         mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
     spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
-        mk_l2_pgentry(__pa(frame_table[gpfn].u.domain->mm.perdomain_pt) | 
+        mk_l2_pgentry(__pa(frame_table[gpfn].u.inuse.domain->mm.perdomain_pt) | 
                       __PAGE_HYPERVISOR);
 #endif
 
@@ -728,7 +728,7 @@ int shadow_fault( unsigned long va, long error_code )
             unsigned long *gpl1e, *spl1e;
             int i;
             sl1pfn_info = alloc_shadow_page( &current->mm ); 
-            sl1pfn_info->type_and_flags = PGT_l1_page_table;
+            sl1pfn_info->u.inuse.type_info = PGT_l1_page_table;
                        
             sl1pfn = sl1pfn_info - frame_table;
 
@@ -1017,7 +1017,7 @@ int check_pagetable( struct mm_struct *m, pagetable_t pt, char *s )
             );
 
     if ( (l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
-          ((__pa(frame_table[gpfn].u.domain->mm.perdomain_pt) | __PAGE_HYPERVISOR))) )
+          ((__pa(frame_table[gpfn].u.inuse.domain->mm.perdomain_pt) | __PAGE_HYPERVISOR))) )
         FAILPT("hypervisor per-domain map inconsistent");
 
 
index 61a8554e050ee3f13c21a91fc114fec33f8dcc35..a38cb6b0ade5e6d6a7e669c3369cd432df5da2b6 100644 (file)
@@ -338,7 +338,7 @@ long do_update_descriptor(
         return -EINVAL;
 
     /* Check if the given frame is in use in an unsafe context. */
-    switch ( page->type_and_flags & PGT_type_mask )
+    switch ( page->u.inuse.type_info & PGT_type_mask )
     {
     case PGT_gdt_page:
         /* Disallow updates of Xen-reserved descriptors in the current GDT. */
index 946cfb106a0a76bd08338e6425cbd5a0f32b4ad1..389cfee61a49231f0b6b69bbfc87fa538e283829 100644 (file)
@@ -442,9 +442,9 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
 
             op->u.getpageframeinfo.type = NOTAB;
 
-            if ( (page->type_and_flags & PGT_count_mask) != 0 )
+            if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
             {
-                switch ( page->type_and_flags & PGT_type_mask )
+                switch ( page->u.inuse.type_info & PGT_type_mask )
                 {
                 case PGT_l1_page_table:
                     op->u.getpageframeinfo.type = L1TAB;
@@ -645,7 +645,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
                 if ( likely(get_page(page, d)) )
                 {
                     unsigned long type = 0;
-                    switch( page->type_and_flags & PGT_type_mask )
+                    switch( page->u.inuse.type_info & PGT_type_mask )
                     {
                     case PGT_l1_page_table:
                         type = L1TAB;
index 8506a5ba48b35d0aca2a97f7f75a195aba954ee2..8f8980599bf5991ea1ebcc56a5bbacb9f3850f3f 100644 (file)
@@ -78,10 +78,10 @@ static long free_dom_mem(struct domain *d,
             break;
         }
 
-        if ( test_and_clear_bit(_PGC_guest_pinned, &page->count_and_flags) )
+        if ( test_and_clear_bit(_PGC_guest_pinned, &page->u.inuse.count_info) )
             put_page_and_type(page);
 
-        if ( test_and_clear_bit(_PGC_allocated, &page->count_and_flags) )
+        if ( test_and_clear_bit(_PGC_allocated, &page->u.inuse.count_info) )
             put_page(page);
 
         put_page(page);
index 111210c5e7ced04f6031733fae8c976f52f64416..3df5013879867782af10348fdb227ea908c31bc0 100644 (file)
@@ -214,7 +214,7 @@ struct pfn_info *alloc_domain_page(struct domain *d)
     if ( unlikely(page == NULL) )
         return NULL;
 
-    if ( (mask = page->u.cpu_mask) != 0 )
+    if ( (mask = page->u.free.cpu_mask) != 0 )
     {
         pfn_stamp = page->tlbflush_timestamp;
         for ( i = 0; (mask != 0) && (i < smp_num_cpus); i++ )
@@ -234,8 +234,8 @@ struct pfn_info *alloc_domain_page(struct domain *d)
         }
     }
 
-    page->u.domain = d;
-    page->type_and_flags = 0;
+    page->u.inuse.domain = d;
+    page->u.inuse.type_info = 0;
     if ( d != NULL )
     {
         wmb(); /* Domain pointer must be visible before updating refcnt. */
@@ -248,7 +248,7 @@ struct pfn_info *alloc_domain_page(struct domain *d)
             goto free_and_exit;
         }
         list_add_tail(&page->list, &d->page_list);
-        page->count_and_flags = PGC_allocated | 1;
+        page->u.inuse.count_info = PGC_allocated | 1;
         if ( unlikely(d->tot_pages++ == 0) )
             get_domain(d);
         spin_unlock(&d->page_alloc_lock);
@@ -268,7 +268,7 @@ void free_domain_page(struct pfn_info *page)
 {
     unsigned long  flags;
     int            drop_dom_ref;
-    struct domain *d = page->u.domain;
+    struct domain *d = page->u.inuse.domain;
 
     if ( unlikely(IS_XEN_HEAP_FRAME(page)) )
     {
@@ -279,7 +279,7 @@ void free_domain_page(struct pfn_info *page)
     else
     {
         page->tlbflush_timestamp = tlbflush_clock;
-        page->u.cpu_mask = 1 << d->processor;
+        page->u.free.cpu_mask = 1 << d->processor;
         
         /* NB. May recursively lock from domain_relinquish_memory(). */
         spin_lock_recursive(&d->page_alloc_lock);
@@ -287,7 +287,7 @@ void free_domain_page(struct pfn_info *page)
         drop_dom_ref = (--d->tot_pages == 0);
         spin_unlock_recursive(&d->page_alloc_lock);
 
-        page->count_and_flags = 0;
+        page->u.inuse.count_info = 0;
         
         spin_lock_irqsave(&free_list_lock, flags);
         list_add(&page->list, &free_list);
index aff7ccfd1b62851dd26f0c0ec634df9f10748aeb..6b5872cfbebbc9f4dab74b01af28bce5d3e881d3 100644 (file)
@@ -82,15 +82,15 @@ void do_task_queues(unsigned char key, void *dev_id,
             {
                 page = list_entry(ent, struct pfn_info, list);
                 printk("Page %08x: caf=%08x, taf=%08x\n",
-                       page_to_phys(page), page->count_and_flags,
-                       page->type_and_flags);
+                       page_to_phys(page), page->u.inuse.count_info,
+                       page->u.inuse.type_info);
             }
         }
 
         page = virt_to_page(d->shared_info);
         printk("Shared_info@%08x: caf=%08x, taf=%08x\n",
-               page_to_phys(page), page->count_and_flags,
-               page->type_and_flags);
+               page_to_phys(page), page->u.inuse.count_info,
+               page->u.inuse.type_info);
                
         printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n", 
                d->shared_info->vcpu_data[0].evtchn_upcall_pending, 
index 039d0b0e3cb038d8b0079902bba28e6535236dd0..46b886e5ab50bed780477e8b6b8726f04d59e2af 100644 (file)
@@ -71,9 +71,9 @@ void __init init_frametable(void *frametable_vstart, unsigned long nr_pages)
           mfn < virt_to_phys(&machine_to_phys_mapping[1<<20])>>PAGE_SHIFT;
           mfn++ )
     {
-        frame_table[mfn].count_and_flags = 1 | PGC_allocated;
-        frame_table[mfn].type_and_flags = 1 | PGT_gdt_page; /* non-RW type */
-        frame_table[mfn].u.domain = &idle0_task;
+        frame_table[mfn].u.inuse.count_info = 1 | PGC_allocated;
+        frame_table[mfn].u.inuse.type_info = 1 | PGT_gdt_page; /* non-RW type */
+        frame_table[mfn].u.inuse.domain = &idle0_task;
     }
 }
 
index c5e809b15c7eff96ab72a5e80778e6f5dfc2743d..e68ff41d3d88c66e936a69ab8f91515215989254 100644 (file)
 
 /*
  * Per-page-frame information.
+ * 
+ * Every architecture must ensure the following:
+ *  1. 'struct pfn_info' contains a 'struct list_head list'.
+ *  2. Provide a PFN_ORDER() macro for accessing the order of a free page.
  */
+#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
 
 struct pfn_info
 {
     /* Each frame can be threaded onto a doubly-linked list. */
     struct list_head list;
-    /* The following possible uses are context-dependent. */
+
+    /* Context-dependent fields follow... */
     union {
-        /* Page is in use: we keep a pointer to its owner. */
-        struct domain *domain;
-        /* Page is not currently allocated: mask of possibly-tainted TLBs. */
-        unsigned long cpu_mask;
+
+        /* Page is in use by a domain. */
+        struct {
+            /* Owner of this page. */
+            struct domain *domain;
+            /* Reference count and various PGC_xxx flags and fields. */
+            u32 count_info;
+            /* Type reference count and various PGT_xxx flags and fields. */
+            u32 type_info;
+        } inuse;
+
+        /* Page is on a free list. */
+        struct {
+            /* Mask of possibly-tainted TLBs. */
+            unsigned long cpu_mask;
+            /* Must be at same offset as 'u.inuse.count_flags'. */
+            u32 __unavailable;
+            /* Order-size of the free chunk this page is the head of. */
+            u8 order;
+        } free;
+
     } u;
-    /* Reference count and various PGC_xxx flags and fields. */
-    u32 count_and_flags;
-    /* Type reference count and various PGT_xxx flags and fields. */
-    u32 type_and_flags;
+
     /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
     u32 tlbflush_timestamp;
 };
@@ -77,13 +97,13 @@ struct pfn_info
 
 #define SHARE_PFN_WITH_DOMAIN(_pfn, _dom)                                   \
     do {                                                                    \
-        (_pfn)->u.domain = (_dom);                                          \
+        (_pfn)->u.inuse.domain = (_dom);                                          \
         /* The incremented type count is intended to pin to 'writeable'. */ \
-        (_pfn)->type_and_flags  = PGT_writeable_page | PGT_validated | 1;   \
+        (_pfn)->u.inuse.type_info  = PGT_writeable_page | PGT_validated | 1;   \
         wmb(); /* install valid domain ptr before updating refcnt. */       \
         spin_lock(&(_dom)->page_alloc_lock);                                \
         /* _dom holds an allocation reference */                            \
-        (_pfn)->count_and_flags = PGC_allocated | 1;                        \
+        (_pfn)->u.inuse.count_info = PGC_allocated | 1;                        \
         if ( unlikely((_dom)->xenheap_pages++ == 0) )                       \
             get_domain(_dom);                                               \
         spin_unlock(&(_dom)->page_alloc_lock);                              \
@@ -106,13 +126,13 @@ void free_page_type(struct pfn_info *page, unsigned int type);
 
 static inline void put_page(struct pfn_info *page)
 {
-    u32 nx, x, y = page->count_and_flags;
+    u32 nx, x, y = page->u.inuse.count_info;
 
     do {
         x  = y;
         nx = x - 1;
     }
-    while ( unlikely((y = cmpxchg(&page->count_and_flags, x, nx)) != x) );
+    while ( unlikely((y = cmpxchg(&page->u.inuse.count_info, x, nx)) != x) );
 
     if ( unlikely((nx & PGC_count_mask) == 0) )
         free_domain_page(page);
@@ -122,8 +142,8 @@ static inline void put_page(struct pfn_info *page)
 static inline int get_page(struct pfn_info *page,
                            struct domain *domain)
 {
-    u32 x, nx, y = page->count_and_flags;
-    struct domain *p, *np = page->u.domain;
+    u32 x, nx, y = page->u.inuse.count_info;
+    struct domain *p, *np = page->u.inuse.domain;
 
     do {
         x  = y;
@@ -137,13 +157,13 @@ static inline int get_page(struct pfn_info *page,
                     " caf=%08x, taf=%08x\n",
                     page_to_pfn(page), domain, domain->domain,
                     p, (p && !((x & PGC_count_mask) == 0))?p->domain:999, 
-                    x, page->type_and_flags);
+                    x, page->u.inuse.type_info);
             return 0;
         }
         __asm__ __volatile__(
             LOCK_PREFIX "cmpxchg8b %3"
             : "=a" (np), "=d" (y), "=b" (p),
-              "=m" (*(volatile u64 *)(&page->u.domain))
+              "=m" (*(volatile u64 *)(&page->u.inuse.domain))
             : "0" (p), "1" (x), "b" (p), "c" (nx) );
     }
     while ( unlikely(np != p) || unlikely(y != x) );
@@ -154,7 +174,7 @@ static inline int get_page(struct pfn_info *page,
 
 static inline void put_page_type(struct pfn_info *page)
 {
-    u32 nx, x, y = page->type_and_flags;
+    u32 nx, x, y = page->u.inuse.type_info;
 
  again:
     do {
@@ -171,7 +191,7 @@ static inline void put_page_type(struct pfn_info *page)
                  * 'free' is safe because the refcnt is non-zero and the
                  * validated bit is clear => other ops will spin or fail.
                  */
-                if ( unlikely((y = cmpxchg(&page->type_and_flags, x, 
+                if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, 
                                            x & ~PGT_validated)) != x) )
                     goto again;
                 /* We cleared the 'valid bit' so we must do the clear up. */
@@ -182,13 +202,13 @@ static inline void put_page_type(struct pfn_info *page)
             }
         }
     }
-    while ( unlikely((y = cmpxchg(&page->type_and_flags, x, nx)) != x) );
+    while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
 }
 
 
 static inline int get_page_type(struct pfn_info *page, u32 type)
 {
-    u32 nx, x, y = page->type_and_flags;
+    u32 nx, x, y = page->u.inuse.type_info;
  again:
     do {
         x  = y;
@@ -218,7 +238,7 @@ static inline int get_page_type(struct pfn_info *page, u32 type)
         else if ( unlikely(!(x & PGT_validated)) )
         {
             /* Someone else is updating validation of this page. Wait... */
-            while ( (y = page->type_and_flags) != x )
+            while ( (y = page->u.inuse.type_info) != x )
             {
                 rep_nop();
                 barrier();
@@ -226,7 +246,7 @@ static inline int get_page_type(struct pfn_info *page, u32 type)
             goto again;
         }
     }
-    while ( unlikely((y = cmpxchg(&page->type_and_flags, x, nx)) != x) );
+    while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
 
     if ( unlikely(!(nx & PGT_validated)) )
     {
@@ -238,7 +258,7 @@ static inline int get_page_type(struct pfn_info *page, u32 type)
             put_page_type(page);
             return 0;
         }
-        set_bit(_PGT_validated, &page->type_and_flags);
+        set_bit(_PGT_validated, &page->u.inuse.type_info);
     }
 
     return 1;
@@ -268,11 +288,11 @@ static inline int get_page_and_type(struct pfn_info *page,
 }
 
 #define ASSERT_PAGE_IS_TYPE(_p, _t)                \
-    ASSERT(((_p)->type_and_flags & PGT_type_mask) == (_t));  \
-    ASSERT(((_p)->type_and_flags & PGT_count_mask) != 0)
+    ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t));  \
+    ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0)
 #define ASSERT_PAGE_IS_DOMAIN(_p, _d)              \
-    ASSERT(((_p)->count_and_flags & PGC_count_mask) != 0);  \
-    ASSERT((_p)->u.domain == (_d))
+    ASSERT(((_p)->u.inuse.count_info & PGC_count_mask) != 0);  \
+    ASSERT((_p)->u.inuse.domain == (_d))
 
 int check_descriptor(unsigned long *d);
 
index 9bce81f1eb2b399906d375bcb73af68fbe194dfc..4ef230854fb82b74f4adee3e529adfc216a86b7b 100644 (file)
@@ -120,9 +120,9 @@ static inline int __mark_dirty( struct mm_struct *m, unsigned int mfn )
         SH_LOG("mark_dirty OOR! mfn=%x pfn=%x max=%x (mm %p)",
                mfn, pfn, m->shadow_dirty_bitmap_size, m );
         SH_LOG("dom=%u caf=%08x taf=%08x\n", 
-               frame_table[mfn].u.domain->domain,
-               frame_table[mfn].count_and_flags
-               frame_table[mfn].type_and_flags );
+               frame_table[mfn].u.inuse.domain->domain,
+               frame_table[mfn].u.inuse.count_info
+               frame_table[mfn].u.inuse.type_info );
     }
 
     return rc;